Lane Detection Pipeline Based on OpenCV

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.

Importing Necessecary Libs

In [1]:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.pyplot import figure, imshow, axis
from matplotlib.image import imread

import numpy as np
import cv2
from glob import glob
import PIL.Image
from IPython.display import Image, HTML, display
import matplotlib.colors
import colorsys

import os
from os import listdir
from os.path import isfile, join

## Importing the necessary libs

import numpy as np
import pandas as pd
import csv
import cv2
import sklearn
import random, pylab

from moviepy.editor import VideoFileClip
from IPython.display import HTML

Loading & Displaying Calibration & Test Images

In [2]:
## This API can display any list of images with corresponding lables
def display_DataSample(dataSet, file_names=None, cmap=None):
    fig = plt.figure(figsize=(30, 45))
    
    for i in range(len(dataSet)): 
        #index = random.randint(0, (len(dataSet)-1))
        image = dataSet[i]
        sub = fig.add_subplot(10, 2, i+1)
        sub.imshow(image, cmap, interpolation='nearest')
        #plt.title(file_names[index])
        
    fig.tight_layout()   
    plt.show()  
    
    return
In [3]:
##Import calibration images
calib_img_list = glob('camera_cal/*.jpg')
calib_img_list = list(map(mpimg.imread, calib_img_list))

##Import test images
test_img_list = glob('test_images/*.jpg')
test_img_list = list(map(mpimg.imread, test_img_list))
In [4]:
display_DataSample(calib_img_list)
In [5]:
display_DataSample(test_img_list)

Camera Calibration

Camera calibration step is intended to calculate some camera parameters which are used further in images undistortion process, these parameters are:

  • Camera matrix.
  • Distortion Coeffs.
  • Camera position in the world in polar & cartesian coordinates.
In [6]:
## Camera Calibration API
##Images 1,4,5 are neglected as they don't conform to the mesh grid (9,6) size

def camera_calib(calib_img_list):
    chess_length = 9
    chess_width = 6
    coordinates = 3
    
    objpoints = []
    imgpoints = []
    calib_output_list = []
    
    objp = np.zeros((chess_length * chess_width, coordinates), np.float32)
    objp[:,:2] = np.mgrid[0:chess_length, 0:chess_width].T.reshape(-1, 2)
    
    ## Aggregate imgpoints & objpoints from all the calibration images
    for i in range(len(calib_img_list)):
        
        gray = cv2.cvtColor(calib_img_list[i], cv2.COLOR_RGB2GRAY)   
        ret, corners = cv2.findChessboardCorners(gray, (chess_length,chess_width), None)
    
        #If corners are available add object & corner points
        if ret == True:
            imgpoints.append(corners)
            objpoints.append(objp)    
            calib_output_list.append(cv2.drawChessboardCorners(calib_img_list[i], (chess_length,chess_width), corners, ret))
    
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)

    return  calib_output_list, mtx, dist
In [7]:
calib_output_list, mtx, dist = camera_calib(calib_img_list)
In [33]:
display_DataSample(calib_output_list)

Image Undistortion

In [37]:
##Images Undistortion API

def undistort(img, mtx=mtx, dist=dist):
    
    undistorted_img = cv2.undistort(img, mtx, dist, None, mtx)
    
    return undistorted_img
In [47]:
## Undistortion Example
undistorted_img_sample = undistort(calib_img_list[10])
plt.subplot(1,2,1)
plt.title("Distorted Image")
plt.imshow(calib_img_list[10])
plt.subplot(1,2,2)
plt.title("Un-Distorted Image")
plt.imshow(undistorted_img_sample)
Out[47]:
<matplotlib.image.AxesImage at 0x11c41da0>

Lanes Extractor Pipeline

This the main pipeline I've started it by defining all the APIs I've tried on to make my pipeline performs better even if I ended up not using them.

1- Gradient & Color Spaces Combined Filters

In [9]:
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=None):
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Apply x or y gradient with the OpenCV Sobel() function
    # and take the absolute value
    if orient == 'x':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
    if orient == 'y':
        abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
    # Rescale back to 8 bit integer
    scaled_sobel = np.uint8(255*(abs_sobel/np.max(abs_sobel)))
    
    # Create a copy and apply the threshold
    binary_output = np.zeros_like(scaled_sobel)
    # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
    binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
    
    
    # Return the result
    return binary_output
In [10]:
# Define a function to return the magnitude of the gradient
# for a given sobel kernel size and threshold values
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Take both Sobel x and y gradients
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # Calculate the gradient magnitude
    gradmag = np.sqrt(sobelx**2 + sobely**2)
    # Rescale to 8 bit
    scale_factor = np.max(gradmag)/255 
    gradmag = (gradmag/scale_factor).astype(np.uint8) 
    # Create a binary image of ones where threshold is met, zeros otherwise
    binary_output = np.zeros_like(gradmag)
    binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1

    # Return the binary image
    return binary_output
In [11]:
# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
    # Grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Calculate the x and y gradients
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # Take the absolute value of the gradient direction, 
    # apply a threshold, and create a binary image result
    absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
    binary_output =  np.zeros_like(absgraddir)
    binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1

    # Return the binary image
    return binary_output
In [12]:
##Create different combinatons of gradient magnitude and direction filters
def grad_combine(img,ksize=3):
    
    # Apply each of the thresholding functions
    gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(20, 100))
    grady = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(20, 100))
    mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(20, 100))
    dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0.7, 1.3))
    
    combined = np.zeros_like(dir_binary)
    combined[((gradx == 1) & (grady == 1)) & ((mag_binary == 1) & (dir_binary == 1))] = 1
    
    return combined
    
In [87]:
# Define a function that thresholds the S-channel & L-channel of HLS color space
def hls_select(img, thresh_s=None, thresh_l=None):
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    s_channel = hls[:,:,2]
    l_channel = hls[:,:,1]
    
    binary_output = np.zeros_like(s_channel)
    binary_output[((s_channel > thresh_s[0]) & (s_channel <= thresh_s[1])) & ((l_channel > thresh_l[0]) & (l_channel <= thresh_l[1]))] = 1
    
    #As you can see I've tried many combinations till reached to a satisfactory output
    #binary_output[((l_channel > thresh_l[0]) & (l_channel <= thresh_l[1]))] = 1
    #binary_output[((s_channel > thresh_s[0]) & (s_channel <= thresh_s[1]))] = 1
    
    return binary_output
In [88]:
## Define a function that combines gradient and color space filters
def grad_hls_combine(img, ksize=3):
    
    sl_binary = hls_select(img, thresh_s=(100, 255), thresh_l=(100, 255))
    sx_binary = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(100, 255))
    grad_comb = grad_combine(img,ksize=ksize)
    
    #As you can see I've tried many combinations of color channels and gradients and gradient thresholds till reached
    #a satisfactory output 
    binary_output = np.zeros_like(sl_binary)
    binary_output[(sl_binary == 1) | (sx_binary == 1)] = 1
    #binary_output[(sl_binary == 1)] = 1
    return binary_output

Visualizing different preprocessing combinations

  • As noticed I've added extra test images for the frames that showed bad detection output to catch the difficult detections from them.
In [82]:
#1- S channel only output
#ksize_list = [3 for i in test_img_list]
binary_img_list = []
for i in range(len(test_img_list)):
    binary_img_list.append(grad_hls_combine(test_img_list[i], ksize=3))
                           
display_DataSample(binary_img_list)
In [86]:
#2- combining S & sobel_x outputs
binary_img_list = []
for i in range(len(test_img_list)):
    binary_img_list.append(grad_hls_combine(test_img_list[i], ksize=3))
                           
display_DataSample(binary_img_list)
In [89]:
#3- combining S,L & sobel_x outputs to improve the detections of the ligh lane line, and also I've tuned(increased) the gradient
#threshold range to minimize the resulted noise as possible
binary_img_list = []
for i in range(len(test_img_list)):
    binary_img_list.append(grad_hls_combine(test_img_list[i], ksize=3))
                           
display_DataSample(binary_img_list)

2- Perspective Transformation

In [48]:
## Bird's Eye Perspective Transformation API
def persp_tarnsform(img):
    
    img_size = (img.shape[1], img.shape[0])

## Also, here I've tried many values for src & dist points and I chose to keep all of them for future enhancements in this 
#pipeline.

##These values are extracted manually from the test images by trial and error methodology 
    bottom_left_src = [210, 720]
    bottom_right_src = [1110, 720]
    top_left_src = [565, 470]
    top_right_src = [720, 470]
    
##These values are used as per some recommendations on Udacity forums     
    bottom_left = [320,720] 
    bottom_right = [920, 720]
    top_left = [320, 1]
    top_right = [920, 1]
    
    #src = np.float32([bottom_left_src,bottom_right_src,top_right_src,top_left_src])
    #dst = np.float32([bottom_left,bottom_right,top_right,top_left])
    
    ################
    w,h = 1280,720
    x,y = 0.5*w, 0.8*h
    src = np.float32([[200./1280*w,720./720*h],
                  [453./1280*w,547./720*h],
                  [835./1280*w,547./720*h],
                  [1100./1280*w,720./720*h]])
    dst = np.float32([[(w-x)/2.,h],
                  [(w-x)/2.,0.82*h],
                  [(w+x)/2.,0.82*h],
                  [(w+x)/2.,h]])

    
    ## Define the region of interest
    #src = np.float32([[190, 700], [1110, 700], [720, 470], [570, 470]])
    #
    #bottom_left = src[0][0]+100, src[0][1]
    #bottom_right = src[1][0]-200, src[1][1]
    #top_left = src[3][0]-250, 1
    #top_right = src[2][0]+200, 1
    #dst = np.float32([bottom_left, bottom_right, top_right, top_left])
    ################################
    
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    binary_warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
    
    ## This is one of the methods I've tried to eliminate the noise on the image sides but it's not used for now.
    ## Neglect any data on the outer shelter of the image (shelter thickness = 50 pixels) as this segments are more likely to 
    ## cause unwanted noise
    #binary_warped[0:50, :] = 0
    #binary_warped[:, 0:50] = 0
    #binary_warped[(img.shape[0] - 50):img.shape[0], :] = 0
    #binary_warped[:, (img.shape[1] - 50):img.shape[1]] = 0
    
    return binary_warped , Minv

Visualizing Bird's Eye View after the preprocessing

In [95]:
warped_img_list = []
for i in range(len(binary_img_list)):
    warped_img, M_inv = persp_tarnsform(binary_img_list[i])
    warped_img_list.append(warped_img)
                           
display_DataSample(warped_img_list, cmap='gray')
In [17]:
## This API is not used in my pipeline but it can show us the strong edges in the bottom half of the image graphically as shown 
## below
def img_hist(img):
    
    histogram = np.sum(img[img.shape[0]//2:,:], axis=0)
    plt.show()
    plt.plot(histogram)
In [53]:
## Visulaizing preprocessed frames histograms

for i in range(len(test_img_list)):
    bird_eye_img , undistorted_img, M_inv = frame_processor(test_img_list[i])
    img_hist(bird_eye_img)

The Main Pipeline APIs

In [16]:
## This API handles all the preprocessing of each frame
def frame_processor(img, ksize=3):
    
    undistorted_img = undistort(img)
    grad_hls_combine_img = grad_hls_combine(undistorted_img, ksize=ksize)
    bird_eye_img, Minv = persp_tarnsform(grad_hls_combine_img)
    
    return bird_eye_img , undistorted_img, Minv #Inverse camera matrix 
In [54]:
## this API applies the sliding window search approach for detecting the lane lines without dependency on a previous fitted polynomials
def blind_slide_window_detector(binary_warped):
    
    # Assuming you have created a warped binary image called "binary_warped"
    # Take a histogram of the bottom half of the image
    histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
    # Create an output image to draw on and  visualize the result
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]/2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint
    
    # Choose the number of sliding windows
    nwindows = 9
    # Set height of windows
    window_height = np.int(binary_warped.shape[0]/nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated for each window
    leftx_current = leftx_base
    rightx_current = rightx_base
    # Set the width of the windows +/- margin
    margin = 100
    # Set minimum number of pixels found to recenter window
    minpix = 50
    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []
    
    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        # Draw the windows on the visualization image
        cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
        (0,255,0), 2) 
        cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
        (0,255,0), 2) 
        # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
    
    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)
    
    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 
    
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    
    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
    
    ##No need for them for now
    #plt.imshow(out_img)
    #plt.plot(left_fitx, ploty, color='yellow')
    #plt.plot(right_fitx, ploty, color='yellow')
    #plt.xlim(0, 1280)
    #plt.ylim(720, 0)
    
    return out_img, ploty, left_fit, right_fit, left_fitx, right_fitx, left_lane_inds, right_lane_inds
In [57]:
def slide_window_detector(binary_warped, left_fit, right_fit):
    # Assume you now have a new warped binary image 
    # from the next frame of video (also called "binary_warped")
    # It's now much easier to find line pixels!
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    margin = 100
    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + 
    left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + 
    left_fit[1]*nonzeroy + left_fit[2] + margin))) 
    
    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + 
    right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + 
    right_fit[1]*nonzeroy + right_fit[2] + margin)))  
    
    # Again, extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    
    
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    
    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    
    ############################################
    ##############################################
    
    # Create an image to draw on and an image to show the selection window
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
    window_img = np.zeros_like(out_img)
    # Color in left and right line pixels
    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
    
    # Generate a polygon to illustrate the search window area
    # And recast the x and y points into usable format for cv2.fillPoly()
    left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
    left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, 
                                  ploty])))])
    left_line_pts = np.hstack((left_line_window1, left_line_window2))
    right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
    right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, 
                                  ploty])))])
    right_line_pts = np.hstack((right_line_window1, right_line_window2))
    
    # Draw the lane onto the warped blank image
    cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
    cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
    result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
    
    ##No need for them for now
    #plt.imshow(result)
    #plt.plot(left_fitx, ploty, color='yellow')
    #plt.plot(right_fitx, ploty, color='yellow')
    #plt.xlim(0, 1280)
    #plt.ylim(720, 0)
    
    return result, ploty, left_fit, right_fit, left_fitx, right_fitx, left_lane_inds, right_lane_inds
In [58]:
# this API fills/highlight the region of interest depending on the sliding window search results
def lane_highlight(binary_warped, undistorted_img, ploty, left_x, right_x, Minv):

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    
    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_x, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_x, ploty])))])
    pts = np.hstack((pts_left, pts_right))
    
    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
    
    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (color_warp.shape[1], color_warp.shape[0])) 
    # Combine the result with the original image
    result = cv2.addWeighted(undistorted_img, 1, newwarp, 0.3, 0)
                    
    return result
In [59]:
## this API Determines the lane curvature & shifting from the lane center of each frame
def lane_curvature(lane_img, ploty, leftx, rightx):

    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/700 # meters per pixel in x dimension
    
    ## Identify the x and y positions of all nonzero pixels in the image
    #nonzero = lane_img.nonzero()
    #nonzeroy = np.array(nonzero[0])
    #nonzerox = np.array(nonzero[1])
    ## Again, extract left and right line pixel positions
    #leftx = nonzerox[left_lane_inds]
    #lefty = nonzeroy[left_lane_inds] 
    #rightx = nonzerox[right_lane_inds]
    #righty = nonzeroy[right_lane_inds]
    
    # Define y-value where we want radius of curvature
    # I'll choose the maximum y-value, corresponding to the bottom of the image
    y_eval = np.max(ploty)

    # Fit new polynomials to x,y in world space
    left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
    right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
    # Calculate the new radii of curvature
    left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
    right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
    # Now our radius of curvature is in meters    
    
    current_lane_curvature = (left_curverad + right_curverad) / 2
    
    ## Determining the shifting of the vehicle center from the lane center line
    ##Note that I've assumed here that the image center indicates the vehicle center (Assumed that camera is centered in the
    ##vehicle front)
    camera_img_center = np.uint16(lane_img.shape[1]/2)
    #print(camera_img_center)
    left_spacing = []
    right_spacing = []
    shif_spac = []
    for i in range(10):
        rand_index = random.randint(0, (len(leftx)-1))
        left_spacing.append(camera_img_center - leftx[rand_index])
        right_spacing.append(rightx[rand_index] - camera_img_center)
        
    shift_space = [x-y for (x,y) in zip(left_spacing, right_spacing)]
    
    #print(shift_space)    
    veh_shift_m = (sum(shift_space)/10) * xm_per_pix
    #print(veh_shift_m)
    if veh_shift_m > 0:
        cv2.putText(lane_img, "Vehicle is %.3f meters left of the lane center" % abs(veh_shift_m), (250, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 4)
    elif veh_shift_m < 0:
        cv2.putText(lane_img, "Vehicle is %.3f meters right of the lane center" % abs(veh_shift_m), (250, 150), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 4)
    else:
        cv2.putText(lane_img, "Vehicle is centered on the lane", (250, 300), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 4)
       
    cv2.putText(lane_img, "Current Radius of Curvature = %.2f Meters" % current_lane_curvature, (250, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 4)
    #plt.imshow(lane_img)
    return current_lane_curvature, lane_img
In [60]:
## This API applies three helpful sanity chck on each frame results: 
## 1- Assuring that there's not a big "Not logical" change between the current and prev lane curvature with an allowable margin of
# change = 50 m.
# 2- Assuring that the lane lines spacing is with acceptable deviation from the standard spacing: +/- 1m.
# 3- Assuring that the lane lines slopes are roughly within range by determing the slopes of random sets of left & right line 
# points and getting their average.

def sanity_checks(current_lane_curvature, prev_lane_curvature, left_fit, right_fit, left_x, right_x):
    
    ##1- Curvature check
    
    if np.float32(abs(np.float32(current_lane_curvature) - prev_lane_curvature)) < 50:
        curvature_check = True
    else:   
        curvature_check = False
        
        
    ##2- Lane lines spacing
    
    xm_per_pix = 3.7/700 # meters per pixel in x dimension
    lane_width_pxl = []
    min_lane_width = 3.7
    
    for i in range(10):
        rand_index = random.randint(0, (len(left_x)-1))
        lane_width_pxl.append(right_x[rand_index] - left_x[rand_index])
    
    avrg_width_m = (sum(lane_width_pxl)/10) * xm_per_pix
    
    if  ((min_lane_width - 1) < avrg_width_m < (min_lane_width + 1)):
        lanes_spacing = True
    else:
        lanes_spacing = False
    
    
    ##3- Lane lines parallelism
    
    left_slope_list = []
    right_slope_list = []
    for i in range(10):
        rand_index = random.randint(0, (len(left_x)-1))
        left_slope_list.append(2*left_fit[0]*left_x[rand_index] + left_fit[1])
        right_slope_list.append(2*right_fit[0]*right_x[rand_index] + right_fit[1])
    
    left_slope = sum(left_slope_list)/len(left_slope_list)
    right_slope = sum(right_slope_list)/len(right_slope_list)
    
    if np.float32(abs(np.float32(left_slope - right_slope))) < 0.5:
        slope_check = True
    else:
        slope_check = False
        
    return curvature_check, lanes_spacing, slope_check
In [61]:
# Define a class to receive the characteristics of each line detection
class Line():
    def __init__(self):
        # was the line detected in the last iteration?
        self.detected = False  
        # x values of the last n fits of the line
        self.recent_xleftfitted = [] 
        self.recent_xrightfitted = []
        #average x values of the fitted line over the last n iterations
        self.bestx_left = []
        self.bestx_right = []
        #polynomial coefficients and the averaged fits over the last n iterations
        self.rightfit = [] 
        self.leftfit = []
        
        self.best_rightfit = [] 
        self.best_leftfit = []
        
        #left & right lane inds
        self.left_lane_inds = []
        self.right_lane_inds = []
        #polynomial coefficients for the most recent fit
        self.current_fit = [np.array([False])]  
        #radius of curvature of the line in some units
        self.radius_of_curvature = 500 ##Initial value
        #distance in meters of vehicle center from the line
        self.line_base_pos = [] 
        #difference in fit coefficients between last and new fits
        self.diffs = np.array([0,0,0], dtype='float') 
        #x values for detected line pixels
        self.allx = []  
        #y values for detected line pixels
        self.ally = []
        #smoothing n iteration counter
        self.iter_cntr = 0
        #Sanity check failure counter
        self.failed = 0
        
In [25]:
## Creating line object to save the latest detections
line = Line()

def lane_detect_withSMOOTHING(img):
    
    ## Number of last frames detections to be smoothed with the new detection
    smoothing_frames_nb = 10
    
    ## Camera is now calibrated and the frame is ready for the preprocessing 
    processed_frame, undistorted_img, Minv = frame_processor(img)

    ## If the previous detections passed the sanity checks then we'll use the saved polynomials left_fitBlind, right_fitBlind
    ## Otherwise the a blind search will start again from scratch
    if line.detected is False:    
        result_imgBlind, plotyBlind, left_fitBlind, right_fitBlind, left_xBlind, right_xBlind, line.left_lane_inds, line.right_lane_inds = blind_slide_window_detector(processed_frame)    
        
        line.recent_xleftfitted.append(left_xBlind)  
        line.recent_xrightfitted.append(right_xBlind)
        line.leftfit.append(left_fitBlind)
        line.rightfit.append(right_fitBlind)
        line.ally = plotyBlind
        line.iter_cntr+=1
        
    else:
        result_img, ploty, left_fit, right_fit, left_x, right_x, line.left_lane_inds, line.right_lane_inds = slide_window_detector(processed_frame, np.asarray(line.best_leftfit), np.asarray(line.best_rightfit))        
       
        line.recent_xleftfitted.append(left_x)
        line.recent_xrightfitted.append(right_x)
        line.leftfit.append(left_fit)
        line.rightfit.append(right_fit)
        line.ally = ploty
        line.iter_cntr+=1
    
        
    # Detections Smoothing
    if line.iter_cntr > smoothing_frames_nb:
        for i in range(smoothing_frames_nb):
            line.bestx_left =  [m+n for (m,n) in zip(line.bestx_left, line.recent_xleftfitted[-(i+1)])]
            line.bestx_right =  [m+n for (m,n) in zip(line.bestx_right, line.recent_xrightfitted[-(i+1)])]
            
            line.best_leftfit = [m+n for (m,n) in zip(line.best_leftfit, line.leftfit[-(i+1)])]
            line.best_rightfit = [m+n for (m,n) in zip(line.best_rightfit, line.rightfit[-(i+1)])]
        
        line.bestx_left = [x/smoothing_frames_nb for x in line.bestx_left]
        line.bestx_right = [x/smoothing_frames_nb for x in line.bestx_right]
        
        line.best_leftfit = [x/smoothing_frames_nb for x in line.best_leftfit]
        line.best_rightfit = [x/smoothing_frames_nb for x in line.best_rightfit]
        
    else:
    line.bestx_left = line.recent_xleftfitted[line.iter_cntr-1]
    line.bestx_right = line.recent_xrightfitted[line.iter_cntr-1]
            
    line.best_leftfit = line.leftfit[line.iter_cntr-1]
    line.best_rightfit = line.rightfit[line.iter_cntr-1]
        
        
    ## Updating RoC & Lane Highlighting
    prev_lane_curvature = line.radius_of_curvature
    lane_img = lane_highlight(processed_frame, undistorted_img, line.ally,  np.asarray(line.bestx_left), np.asarray(line.bestx_right), Minv)
    current_lane_curvature, lane_img = lane_curvature(lane_img, line.ally, np.asarray(line.bestx_left), np.asarray(line.bestx_right))
    line.radius_of_curvature = current_lane_curvature
    
    ##Sanity checks for the next frame processing
    curvature_check, lanes_spacing, slope_check = sanity_checks(current_lane_curvature, prev_lane_curvature, np.asarray(line.best_leftfit), np.asarray(line.best_rightfit), line.bestx_left, line.bestx_right)
    
    ## Deciding whether to start the window blind search again or not
    ## I belive from my debugging experience with this pipeline that lanes_spacing & slope_check sanity checks are more sensitive
    #and suspective to failure so I reduced their impact on the detections confidence to 50% of curvature_check impact.
    if (curvature_check & (lanes_spacing | slope_check)):
        
        line.detected = True
    else:
        line.detected = False
    
    
    ## Removing old redundant detections continousely
    if(line.iter_cntr > (2*smoothing_frames_nb)):
        line.recent_xleftfitted = line.recent_xleftfitted[smoothing_frames_nb::]
        line.recent_xrightfitted = line.recent_xrightfitted[smoothing_frames_nb::]
        line.leftfit = line.leftfit[smoothing_frames_nb::]
        line.rightfit = line.rightfit[smoothing_frames_nb::]
        line.iter_cntr = 0
        
    return lane_img
         
In [29]:
## Creating line object to save the latest detections
line = Line()

## This API should perfom the lane detection without performing the smoothing process and this is what I finally used as I 
#didn't experienced a noticable enhancement using the smoothing step.

def lane_detect(img):
        
    ## Camera is now calibrated and the frame is ready for the preprocessing 
    processed_frame, undistorted_img, Minv = frame_processor(img)

    if line.detected is False:  
        
        result_imgBlind, plotyBlind, left_fitBlind, right_fitBlind, left_xBlind, right_xBlind, line.left_lane_inds, line.right_lane_inds = blind_slide_window_detector(processed_frame)    
        
        line.bestx_left = left_xBlind
        line.bestx_right = right_xBlind
                
        line.best_leftfit = left_fitBlind
        line.best_rightfit = right_fitBlind
        
        line.ally = plotyBlind
        
    else:
        result_img, ploty, left_fit, right_fit, left_x, right_x, line.left_lane_inds, line.right_lane_inds = slide_window_detector(processed_frame, np.asarray(line.best_leftfit), np.asarray(line.best_rightfit))        

        line.bestx_left = left_x
        line.bestx_right = right_x
                
        line.best_leftfit = left_fit
        line.best_rightfit = right_fit
        
        line.ally = ploty    
    
           
    ## Updating RoC & Lane Highlighting
    prev_lane_curvature = line.radius_of_curvature
    lane_img = lane_highlight(processed_frame, undistorted_img, line.ally,  np.asarray(line.bestx_left), np.asarray(line.bestx_right), Minv)
    current_lane_curvature, lane_img = lane_curvature(lane_img, line.ally, np.asarray(line.bestx_left), np.asarray(line.bestx_right))
    line.radius_of_curvature = current_lane_curvature
    
    ##Sanity checks for the next frame processing
    curvature_check, lanes_spacing, slope_check = sanity_checks(current_lane_curvature, prev_lane_curvature, np.asarray(line.best_leftfit), np.asarray(line.best_rightfit), line.bestx_left, line.bestx_right)
    
    ## Deciding whether to start the window blind search again or not
    if (curvature_check & (lanes_spacing | slope_check)):
        
        line.detected = True
    else:
        line.detected = False
    
    return lane_img
In [30]:
## Specifying the output file
white_output = 'test_videos_output/project_output.mp4'
##Passing in the project video 
clip1 = VideoFileClip("project_video.mp4")
##Run the Lane Detection pipeline on each frame
white_clip = clip1.fl_image(lane_detect) 
%time white_clip.write_videofile(white_output, audio=False)
[MoviePy] >>>> Building video test_videos_output/project_26.mp4
[MoviePy] Writing video test_videos_output/project_26.mp4
  0%|                                                  | 0/351 [00:00<?, ?it/s]
  0%|                                          | 1/351 [00:00<01:40,  3.47it/s]
  1%|▏                                         | 2/351 [00:00<01:43,  3.36it/s]
  1%|▎                                         | 3/351 [00:00<01:40,  3.46it/s]
  1%|▍                                         | 4/351 [00:01<01:38,  3.53it/s]
  1%|▌                                         | 5/351 [00:01<01:39,  3.48it/s]
  2%|▋                                         | 6/351 [00:01<01:39,  3.46it/s]
  2%|▊                                         | 7/351 [00:02<01:38,  3.48it/s]
  2%|▉                                         | 8/351 [00:02<01:38,  3.49it/s]
  3%|█                                         | 9/351 [00:02<01:37,  3.52it/s]
  3%|█▏                                       | 10/351 [00:02<01:36,  3.54it/s]
  3%|█▎                                       | 11/351 [00:03<01:37,  3.48it/s]
  3%|█▍                                       | 12/351 [00:03<01:36,  3.51it/s]
  4%|█▌                                       | 13/351 [00:03<01:34,  3.57it/s]
  4%|█▋                                       | 14/351 [00:03<01:33,  3.61it/s]
  4%|█▊                                       | 15/351 [00:04<01:34,  3.56it/s]
  5%|█▊                                       | 16/351 [00:04<01:32,  3.61it/s]
  5%|█▉                                       | 17/351 [00:04<01:34,  3.53it/s]
  5%|██                                       | 18/351 [00:05<01:35,  3.49it/s]
  5%|██▏                                      | 19/351 [00:05<01:35,  3.46it/s]
  6%|██▎                                      | 20/351 [00:05<01:41,  3.25it/s]
  6%|██▍                                      | 21/351 [00:06<01:39,  3.31it/s]
  6%|██▌                                      | 22/351 [00:06<01:40,  3.28it/s]
  7%|██▋                                      | 23/351 [00:06<01:37,  3.37it/s]
  7%|██▊                                      | 24/351 [00:06<01:38,  3.34it/s]
  7%|██▉                                      | 25/351 [00:07<01:35,  3.40it/s]
  7%|███                                      | 26/351 [00:07<01:34,  3.43it/s]
  8%|███▏                                     | 27/351 [00:07<01:33,  3.46it/s]
  8%|███▎                                     | 28/351 [00:08<01:32,  3.51it/s]
  8%|███▍                                     | 29/351 [00:08<01:30,  3.55it/s]
  9%|███▌                                     | 30/351 [00:08<01:30,  3.54it/s]
  9%|███▌                                     | 31/351 [00:08<01:29,  3.56it/s]
  9%|███▋                                     | 32/351 [00:09<01:29,  3.56it/s]
  9%|███▊                                     | 33/351 [00:09<01:29,  3.57it/s]
 10%|███▉                                     | 34/351 [00:09<01:28,  3.58it/s]
 10%|████                                     | 35/351 [00:10<01:27,  3.60it/s]
 10%|████▏                                    | 36/351 [00:10<01:28,  3.55it/s]
 11%|████▎                                    | 37/351 [00:10<01:28,  3.56it/s]
 11%|████▍                                    | 38/351 [00:10<01:27,  3.59it/s]
 11%|████▌                                    | 39/351 [00:11<01:28,  3.51it/s]
 11%|████▋                                    | 40/351 [00:11<01:28,  3.53it/s]
 12%|████▊                                    | 41/351 [00:11<01:28,  3.51it/s]
 12%|████▉                                    | 42/351 [00:12<01:28,  3.49it/s]
 12%|█████                                    | 43/351 [00:12<01:31,  3.35it/s]
 13%|█████▏                                   | 44/351 [00:12<01:29,  3.43it/s]
 13%|█████▎                                   | 45/351 [00:12<01:27,  3.48it/s]
 13%|█████▎                                   | 46/351 [00:13<01:27,  3.48it/s]
 13%|█████▍                                   | 47/351 [00:13<01:26,  3.50it/s]
 14%|█████▌                                   | 48/351 [00:13<01:27,  3.44it/s]
 14%|█████▋                                   | 49/351 [00:14<01:27,  3.44it/s]
 14%|█████▊                                   | 50/351 [00:14<01:26,  3.48it/s]
 15%|█████▉                                   | 51/351 [00:14<01:25,  3.49it/s]
 15%|██████                                   | 52/351 [00:14<01:26,  3.46it/s]
 15%|██████▏                                  | 53/351 [00:15<01:26,  3.46it/s]
 15%|██████▎                                  | 54/351 [00:15<01:25,  3.46it/s]
 16%|██████▍                                  | 55/351 [00:15<01:26,  3.43it/s]
 16%|██████▌                                  | 56/351 [00:16<01:26,  3.40it/s]
 16%|██████▋                                  | 57/351 [00:16<01:26,  3.40it/s]
 17%|██████▊                                  | 58/351 [00:16<01:25,  3.42it/s]
 17%|██████▉                                  | 59/351 [00:16<01:24,  3.47it/s]
 17%|███████                                  | 60/351 [00:17<01:24,  3.43it/s]
 17%|███████▏                                 | 61/351 [00:17<01:23,  3.46it/s]
 18%|███████▏                                 | 62/351 [00:17<01:23,  3.48it/s]
 18%|███████▎                                 | 63/351 [00:18<01:23,  3.45it/s]
 18%|███████▍                                 | 64/351 [00:18<01:24,  3.42it/s]
 19%|███████▌                                 | 65/351 [00:18<01:27,  3.28it/s]
 19%|███████▋                                 | 66/351 [00:19<01:26,  3.31it/s]
 19%|███████▊                                 | 67/351 [00:19<01:24,  3.35it/s]
 19%|███████▉                                 | 68/351 [00:19<01:22,  3.42it/s]
 20%|████████                                 | 69/351 [00:19<01:22,  3.43it/s]
 20%|████████▏                                | 70/351 [00:20<01:21,  3.46it/s]
 20%|████████▎                                | 71/351 [00:20<01:21,  3.45it/s]
 21%|████████▍                                | 72/351 [00:20<01:21,  3.43it/s]
 21%|████████▌                                | 73/351 [00:21<01:23,  3.32it/s]
 21%|████████▋                                | 74/351 [00:21<01:25,  3.23it/s]
 21%|████████▊                                | 75/351 [00:21<01:25,  3.22it/s]
 22%|████████▉                                | 76/351 [00:22<01:25,  3.23it/s]
 22%|████████▉                                | 77/351 [00:22<01:24,  3.23it/s]
 22%|█████████                                | 78/351 [00:22<01:24,  3.22it/s]
 23%|█████████▏                               | 79/351 [00:22<01:23,  3.26it/s]
 23%|█████████▎                               | 80/351 [00:23<01:22,  3.29it/s]
 23%|█████████▍                               | 81/351 [00:23<01:21,  3.30it/s]
 23%|█████████▌                               | 82/351 [00:23<01:20,  3.32it/s]
 24%|█████████▋                               | 83/351 [00:24<01:19,  3.38it/s]
 24%|█████████▊                               | 84/351 [00:24<01:18,  3.39it/s]
 24%|█████████▉                               | 85/351 [00:24<01:19,  3.36it/s]
 25%|██████████                               | 86/351 [00:25<01:18,  3.38it/s]
 25%|██████████▏                              | 87/351 [00:25<01:17,  3.41it/s]
 25%|██████████▎                              | 88/351 [00:25<01:16,  3.43it/s]
 25%|██████████▍                              | 89/351 [00:25<01:16,  3.43it/s]
 26%|██████████▌                              | 90/351 [00:26<01:15,  3.44it/s]
 26%|██████████▋                              | 91/351 [00:26<01:15,  3.44it/s]
 26%|██████████▋                              | 92/351 [00:26<01:15,  3.43it/s]
 26%|██████████▊                              | 93/351 [00:27<01:14,  3.45it/s]
 27%|██████████▉                              | 94/351 [00:27<01:14,  3.45it/s]
 27%|███████████                              | 95/351 [00:27<01:14,  3.45it/s]
 27%|███████████▏                             | 96/351 [00:27<01:14,  3.43it/s]
 28%|███████████▎                             | 97/351 [00:28<01:14,  3.40it/s]
 28%|███████████▍                             | 98/351 [00:28<01:14,  3.41it/s]
 28%|███████████▌                             | 99/351 [00:28<01:13,  3.45it/s]
 28%|███████████▍                            | 100/351 [00:29<01:14,  3.36it/s]
 29%|███████████▌                            | 101/351 [00:29<01:15,  3.30it/s]
 29%|███████████▌                            | 102/351 [00:29<01:14,  3.35it/s]
 29%|███████████▋                            | 103/351 [00:30<01:14,  3.35it/s]
 30%|███████████▊                            | 104/351 [00:30<01:13,  3.38it/s]
 30%|███████████▉                            | 105/351 [00:30<01:13,  3.33it/s]
 30%|████████████                            | 106/351 [00:30<01:13,  3.34it/s]
 30%|████████████▏                           | 107/351 [00:31<01:12,  3.36it/s]
 31%|████████████▎                           | 108/351 [00:31<01:11,  3.40it/s]
 31%|████████████▍                           | 109/351 [00:31<01:10,  3.43it/s]
 31%|████████████▌                           | 110/351 [00:32<01:10,  3.42it/s]
 32%|████████████▋                           | 111/351 [00:32<01:10,  3.39it/s]
 32%|████████████▊                           | 112/351 [00:32<01:13,  3.26it/s]
 32%|████████████▉                           | 113/351 [00:33<01:11,  3.32it/s]
 32%|████████████▉                           | 114/351 [00:33<01:11,  3.33it/s]
 33%|█████████████                           | 115/351 [00:33<01:10,  3.33it/s]
 33%|█████████████▏                          | 116/351 [00:33<01:11,  3.31it/s]
 33%|█████████████▎                          | 117/351 [00:34<01:09,  3.38it/s]
 34%|█████████████▍                          | 118/351 [00:34<01:09,  3.36it/s]
 34%|█████████████▌                          | 119/351 [00:34<01:08,  3.39it/s]
 34%|█████████████▋                          | 120/351 [00:35<01:07,  3.42it/s]
 34%|█████████████▊                          | 121/351 [00:35<01:06,  3.44it/s]
 35%|█████████████▉                          | 122/351 [00:35<01:06,  3.43it/s]
 35%|██████████████                          | 123/351 [00:35<01:06,  3.44it/s]
 35%|██████████████▏                         | 124/351 [00:36<01:05,  3.49it/s]
 36%|██████████████▏                         | 125/351 [00:36<01:05,  3.45it/s]
 36%|██████████████▎                         | 126/351 [00:36<01:05,  3.45it/s]
 36%|██████████████▍                         | 127/351 [00:37<01:03,  3.50it/s]
 36%|██████████████▌                         | 128/351 [00:37<01:03,  3.49it/s]
 37%|██████████████▋                         | 129/351 [00:37<01:04,  3.46it/s]
 37%|██████████████▊                         | 130/351 [00:37<01:03,  3.47it/s]
 37%|██████████████▉                         | 131/351 [00:38<01:05,  3.36it/s]
 38%|███████████████                         | 132/351 [00:38<01:04,  3.39it/s]
 38%|███████████████▏                        | 133/351 [00:38<01:04,  3.36it/s]
 38%|███████████████▎                        | 134/351 [00:39<01:04,  3.34it/s]
 38%|███████████████▍                        | 135/351 [00:39<01:03,  3.40it/s]
 39%|███████████████▍                        | 136/351 [00:39<01:03,  3.36it/s]
 39%|███████████████▌                        | 137/351 [00:40<01:02,  3.43it/s]
 39%|███████████████▋                        | 138/351 [00:40<01:02,  3.42it/s]
 40%|███████████████▊                        | 139/351 [00:40<01:01,  3.45it/s]
 40%|███████████████▉                        | 140/351 [00:40<01:00,  3.46it/s]
 40%|████████████████                        | 141/351 [00:41<01:00,  3.49it/s]
 40%|████████████████▏                       | 142/351 [00:41<00:59,  3.51it/s]
 41%|████████████████▎                       | 143/351 [00:41<00:59,  3.50it/s]
 41%|████████████████▍                       | 144/351 [00:42<00:59,  3.46it/s]
 41%|████████████████▌                       | 145/351 [00:42<00:59,  3.45it/s]
 42%|████████████████▋                       | 146/351 [00:42<00:58,  3.48it/s]
 42%|████████████████▊                       | 147/351 [00:42<00:58,  3.46it/s]
 42%|████████████████▊                       | 148/351 [00:43<00:58,  3.44it/s]
 42%|████████████████▉                       | 149/351 [00:43<00:58,  3.45it/s]
 43%|█████████████████                       | 150/351 [00:43<00:58,  3.42it/s]
 43%|█████████████████▏                      | 151/351 [00:44<00:58,  3.41it/s]
 43%|█████████████████▎                      | 152/351 [00:44<00:58,  3.43it/s]
 44%|█████████████████▍                      | 153/351 [00:44<00:56,  3.48it/s]
 44%|█████████████████▌                      | 154/351 [00:44<00:56,  3.46it/s]
 44%|█████████████████▋                      | 155/351 [00:45<00:57,  3.40it/s]
 44%|█████████████████▊                      | 156/351 [00:45<00:57,  3.41it/s]
 45%|█████████████████▉                      | 157/351 [00:45<00:58,  3.34it/s]
 45%|██████████████████                      | 158/351 [00:46<00:57,  3.33it/s]
 45%|██████████████████                      | 159/351 [00:46<00:57,  3.35it/s]
 46%|██████████████████▏                     | 160/351 [00:46<00:57,  3.33it/s]
 46%|██████████████████▎                     | 161/351 [00:47<00:56,  3.37it/s]
 46%|██████████████████▍                     | 162/351 [00:47<00:56,  3.34it/s]
 46%|██████████████████▌                     | 163/351 [00:47<00:56,  3.32it/s]
 47%|██████████████████▋                     | 164/351 [00:48<00:59,  3.14it/s]
 47%|██████████████████▊                     | 165/351 [00:48<00:58,  3.17it/s]
 47%|██████████████████▉                     | 166/351 [00:48<00:59,  3.11it/s]
 48%|███████████████████                     | 167/351 [00:48<00:57,  3.18it/s]
 48%|███████████████████▏                    | 168/351 [00:49<00:58,  3.13it/s]
 48%|███████████████████▎                    | 169/351 [00:49<00:57,  3.16it/s]
 48%|███████████████████▎                    | 170/351 [00:49<01:00,  3.00it/s]
 49%|███████████████████▍                    | 171/351 [00:50<01:01,  2.94it/s]
 49%|███████████████████▌                    | 172/351 [00:50<00:58,  3.04it/s]
 49%|███████████████████▋                    | 173/351 [00:50<00:57,  3.07it/s]
 50%|███████████████████▊                    | 174/351 [00:51<00:57,  3.08it/s]
 50%|███████████████████▉                    | 175/351 [00:51<00:56,  3.09it/s]
 50%|████████████████████                    | 176/351 [00:51<00:54,  3.20it/s]
 50%|████████████████████▏                   | 177/351 [00:52<00:55,  3.16it/s]
 51%|████████████████████▎                   | 178/351 [00:52<00:53,  3.26it/s]
 51%|████████████████████▍                   | 179/351 [00:52<00:53,  3.23it/s]
 51%|████████████████████▌                   | 180/351 [00:53<00:52,  3.28it/s]
 52%|████████████████████▋                   | 181/351 [00:53<00:52,  3.26it/s]
 52%|████████████████████▋                   | 182/351 [00:53<00:50,  3.36it/s]
 52%|████████████████████▊                   | 183/351 [00:53<00:50,  3.35it/s]
 52%|████████████████████▉                   | 184/351 [00:54<00:49,  3.40it/s]
 53%|█████████████████████                   | 185/351 [00:54<00:49,  3.39it/s]
 53%|█████████████████████▏                  | 186/351 [00:54<00:48,  3.41it/s]
 53%|█████████████████████▎                  | 187/351 [00:55<00:48,  3.35it/s]
 54%|█████████████████████▍                  | 188/351 [00:55<00:48,  3.37it/s]
 54%|█████████████████████▌                  | 189/351 [00:55<00:47,  3.39it/s]
 54%|█████████████████████▋                  | 190/351 [00:56<00:46,  3.43it/s]
 54%|█████████████████████▊                  | 191/351 [00:56<00:47,  3.39it/s]
 55%|█████████████████████▉                  | 192/351 [00:56<00:47,  3.38it/s]
 55%|█████████████████████▉                  | 193/351 [00:56<00:47,  3.34it/s]
 55%|██████████████████████                  | 194/351 [00:57<00:50,  3.10it/s]
 56%|██████████████████████▏                 | 195/351 [00:57<00:52,  2.98it/s]
 56%|██████████████████████▎                 | 196/351 [00:58<00:51,  2.99it/s]
 56%|██████████████████████▍                 | 197/351 [00:58<00:51,  2.98it/s]
 56%|██████████████████████▌                 | 198/351 [00:58<00:50,  3.05it/s]
 57%|██████████████████████▋                 | 199/351 [00:58<00:49,  3.10it/s]
 57%|██████████████████████▊                 | 200/351 [00:59<00:47,  3.20it/s]
 57%|██████████████████████▉                 | 201/351 [00:59<00:46,  3.21it/s]
 58%|███████████████████████                 | 202/351 [00:59<00:47,  3.15it/s]
 58%|███████████████████████▏                | 203/351 [01:00<00:47,  3.13it/s]
 58%|███████████████████████▏                | 204/351 [01:00<00:45,  3.23it/s]
 58%|███████████████████████▎                | 205/351 [01:00<00:44,  3.25it/s]
 59%|███████████████████████▍                | 206/351 [01:01<00:43,  3.33it/s]
 59%|███████████████████████▌                | 207/351 [01:01<00:43,  3.29it/s]
 59%|███████████████████████▋                | 208/351 [01:01<00:42,  3.38it/s]
 60%|███████████████████████▊                | 209/351 [01:01<00:42,  3.37it/s]
 60%|███████████████████████▉                | 210/351 [01:02<00:41,  3.42it/s]
 60%|████████████████████████                | 211/351 [01:02<00:41,  3.34it/s]
 60%|████████████████████████▏               | 212/351 [01:02<00:40,  3.40it/s]
 61%|████████████████████████▎               | 213/351 [01:03<00:41,  3.35it/s]
 61%|████████████████████████▍               | 214/351 [01:03<00:41,  3.33it/s]
 61%|████████████████████████▌               | 215/351 [01:03<00:41,  3.24it/s]
 62%|████████████████████████▌               | 216/351 [01:04<00:41,  3.29it/s]
 62%|████████████████████████▋               | 217/351 [01:04<00:41,  3.23it/s]
 62%|████████████████████████▊               | 218/351 [01:04<00:40,  3.29it/s]
 62%|████████████████████████▉               | 219/351 [01:05<00:40,  3.25it/s]
 63%|█████████████████████████               | 220/351 [01:05<00:39,  3.31it/s]
 63%|█████████████████████████▏              | 221/351 [01:05<00:40,  3.20it/s]
 63%|█████████████████████████▎              | 222/351 [01:05<00:39,  3.29it/s]
 64%|█████████████████████████▍              | 223/351 [01:06<00:39,  3.22it/s]
 64%|█████████████████████████▌              | 224/351 [01:06<00:38,  3.31it/s]
 64%|█████████████████████████▋              | 225/351 [01:06<00:38,  3.30it/s]
 64%|█████████████████████████▊              | 226/351 [01:07<00:38,  3.28it/s]
 65%|█████████████████████████▊              | 227/351 [01:07<00:37,  3.32it/s]
 65%|█████████████████████████▉              | 228/351 [01:07<00:37,  3.32it/s]
 65%|██████████████████████████              | 229/351 [01:08<00:36,  3.33it/s]
 66%|██████████████████████████▏             | 230/351 [01:08<00:35,  3.37it/s]
 66%|██████████████████████████▎             | 231/351 [01:08<00:35,  3.40it/s]
 66%|██████████████████████████▍             | 232/351 [01:08<00:34,  3.44it/s]
 66%|██████████████████████████▌             | 233/351 [01:09<00:33,  3.50it/s]
 67%|██████████████████████████▋             | 234/351 [01:09<00:33,  3.51it/s]
 67%|██████████████████████████▊             | 235/351 [01:09<00:33,  3.48it/s]
 67%|██████████████████████████▉             | 236/351 [01:10<00:33,  3.48it/s]
 68%|███████████████████████████             | 237/351 [01:10<00:32,  3.47it/s]
 68%|███████████████████████████             | 238/351 [01:10<00:32,  3.49it/s]
 68%|███████████████████████████▏            | 239/351 [01:10<00:31,  3.52it/s]
 68%|███████████████████████████▎            | 240/351 [01:11<00:31,  3.55it/s]
 69%|███████████████████████████▍            | 241/351 [01:11<00:30,  3.57it/s]
 69%|███████████████████████████▌            | 242/351 [01:11<00:30,  3.58it/s]
 69%|███████████████████████████▋            | 243/351 [01:12<00:30,  3.58it/s]
 70%|███████████████████████████▊            | 244/351 [01:12<00:30,  3.54it/s]
 70%|███████████████████████████▉            | 245/351 [01:12<00:30,  3.46it/s]
 70%|████████████████████████████            | 246/351 [01:12<00:29,  3.51it/s]
 70%|████████████████████████████▏           | 247/351 [01:13<00:29,  3.49it/s]
 71%|████████████████████████████▎           | 248/351 [01:13<00:29,  3.48it/s]
 71%|████████████████████████████▍           | 249/351 [01:13<00:29,  3.40it/s]
 71%|████████████████████████████▍           | 250/351 [01:14<00:29,  3.43it/s]
 72%|████████████████████████████▌           | 251/351 [01:14<00:28,  3.51it/s]
 72%|████████████████████████████▋           | 252/351 [01:14<00:27,  3.54it/s]
 72%|████████████████████████████▊           | 253/351 [01:14<00:27,  3.57it/s]
 72%|████████████████████████████▉           | 254/351 [01:15<00:27,  3.57it/s]
 73%|█████████████████████████████           | 255/351 [01:15<00:27,  3.55it/s]
 73%|█████████████████████████████▏          | 256/351 [01:15<00:26,  3.53it/s]
 73%|█████████████████████████████▎          | 257/351 [01:16<00:26,  3.56it/s]
 74%|█████████████████████████████▍          | 258/351 [01:16<00:25,  3.58it/s]
 74%|█████████████████████████████▌          | 259/351 [01:16<00:26,  3.53it/s]
 74%|█████████████████████████████▋          | 260/351 [01:16<00:26,  3.43it/s]
 74%|█████████████████████████████▋          | 261/351 [01:17<00:25,  3.48it/s]
 75%|█████████████████████████████▊          | 262/351 [01:17<00:25,  3.47it/s]
 75%|█████████████████████████████▉          | 263/351 [01:17<00:25,  3.42it/s]
 75%|██████████████████████████████          | 264/351 [01:18<00:25,  3.44it/s]
 75%|██████████████████████████████▏         | 265/351 [01:18<00:24,  3.48it/s]
 76%|██████████████████████████████▎         | 266/351 [01:18<00:24,  3.44it/s]
 76%|██████████████████████████████▍         | 267/351 [01:18<00:25,  3.36it/s]
 76%|██████████████████████████████▌         | 268/351 [01:19<00:24,  3.33it/s]
 77%|██████████████████████████████▋         | 269/351 [01:19<00:24,  3.32it/s]
 77%|██████████████████████████████▊         | 270/351 [01:19<00:23,  3.38it/s]
 77%|██████████████████████████████▉         | 271/351 [01:20<00:23,  3.39it/s]
 77%|██████████████████████████████▉         | 272/351 [01:20<00:23,  3.39it/s]
 78%|███████████████████████████████         | 273/351 [01:20<00:24,  3.14it/s]
 78%|███████████████████████████████▏        | 274/351 [01:21<00:23,  3.23it/s]
 78%|███████████████████████████████▎        | 275/351 [01:21<00:23,  3.25it/s]
 79%|███████████████████████████████▍        | 276/351 [01:21<00:22,  3.29it/s]
 79%|███████████████████████████████▌        | 277/351 [01:22<00:23,  3.20it/s]
 79%|███████████████████████████████▋        | 278/351 [01:22<00:22,  3.26it/s]
 79%|███████████████████████████████▊        | 279/351 [01:22<00:21,  3.31it/s]
 80%|███████████████████████████████▉        | 280/351 [01:22<00:21,  3.28it/s]
 80%|████████████████████████████████        | 281/351 [01:23<00:22,  3.15it/s]
 80%|████████████████████████████████▏       | 282/351 [01:23<00:23,  2.99it/s]
 81%|████████████████████████████████▎       | 283/351 [01:23<00:22,  3.04it/s]
 81%|████████████████████████████████▎       | 284/351 [01:24<00:21,  3.10it/s]
 81%|████████████████████████████████▍       | 285/351 [01:24<00:20,  3.16it/s]
 81%|████████████████████████████████▌       | 286/351 [01:24<00:20,  3.21it/s]
 82%|████████████████████████████████▋       | 287/351 [01:25<00:19,  3.29it/s]
 82%|████████████████████████████████▊       | 288/351 [01:25<00:18,  3.38it/s]
 82%|████████████████████████████████▉       | 289/351 [01:25<00:18,  3.30it/s]
 83%|█████████████████████████████████       | 290/351 [01:26<00:19,  3.20it/s]
 83%|█████████████████████████████████▏      | 291/351 [01:26<00:19,  3.15it/s]
 83%|█████████████████████████████████▎      | 292/351 [01:26<00:18,  3.21it/s]
 83%|█████████████████████████████████▍      | 293/351 [01:26<00:17,  3.34it/s]
 84%|█████████████████████████████████▌      | 294/351 [01:27<00:16,  3.40it/s]
 84%|█████████████████████████████████▌      | 295/351 [01:27<00:16,  3.44it/s]
 84%|█████████████████████████████████▋      | 296/351 [01:27<00:15,  3.48it/s]
 85%|█████████████████████████████████▊      | 297/351 [01:28<00:15,  3.46it/s]
 85%|█████████████████████████████████▉      | 298/351 [01:28<00:15,  3.49it/s]
 85%|██████████████████████████████████      | 299/351 [01:28<00:14,  3.49it/s]
 85%|██████████████████████████████████▏     | 300/351 [01:28<00:14,  3.51it/s]
 86%|██████████████████████████████████▎     | 301/351 [01:29<00:14,  3.44it/s]
 86%|██████████████████████████████████▍     | 302/351 [01:29<00:14,  3.50it/s]
 86%|██████████████████████████████████▌     | 303/351 [01:29<00:14,  3.37it/s]
 87%|██████████████████████████████████▋     | 304/351 [01:30<00:13,  3.45it/s]
 87%|██████████████████████████████████▊     | 305/351 [01:30<00:13,  3.52it/s]
 87%|██████████████████████████████████▊     | 306/351 [01:30<00:12,  3.55it/s]
 87%|██████████████████████████████████▉     | 307/351 [01:30<00:12,  3.54it/s]
 88%|███████████████████████████████████     | 308/351 [01:31<00:12,  3.54it/s]
 88%|███████████████████████████████████▏    | 309/351 [01:31<00:11,  3.50it/s]
 88%|███████████████████████████████████▎    | 310/351 [01:31<00:11,  3.55it/s]
 89%|███████████████████████████████████▍    | 311/351 [01:32<00:11,  3.47it/s]
 89%|███████████████████████████████████▌    | 312/351 [01:32<00:11,  3.53it/s]
 89%|███████████████████████████████████▋    | 313/351 [01:32<00:11,  3.41it/s]
 89%|███████████████████████████████████▊    | 314/351 [01:33<00:11,  3.29it/s]
 90%|███████████████████████████████████▉    | 315/351 [01:33<00:11,  3.20it/s]
 90%|████████████████████████████████████    | 316/351 [01:33<00:10,  3.27it/s]
 90%|████████████████████████████████████▏   | 317/351 [01:33<00:10,  3.32it/s]
 91%|████████████████████████████████████▏   | 318/351 [01:34<00:09,  3.41it/s]
 91%|████████████████████████████████████▎   | 319/351 [01:34<00:09,  3.44it/s]
 91%|████████████████████████████████████▍   | 320/351 [01:34<00:09,  3.36it/s]
 91%|████████████████████████████████████▌   | 321/351 [01:35<00:08,  3.45it/s]
 92%|████████████████████████████████████▋   | 322/351 [01:35<00:08,  3.39it/s]
 92%|████████████████████████████████████▊   | 323/351 [01:35<00:08,  3.44it/s]
 92%|████████████████████████████████████▉   | 324/351 [01:35<00:07,  3.48it/s]
 93%|█████████████████████████████████████   | 325/351 [01:36<00:07,  3.27it/s]
 93%|█████████████████████████████████████▏  | 326/351 [01:36<00:07,  3.31it/s]
 93%|█████████████████████████████████████▎  | 327/351 [01:36<00:07,  3.20it/s]
 93%|█████████████████████████████████████▍  | 328/351 [01:37<00:07,  3.18it/s]
 94%|█████████████████████████████████████▍  | 329/351 [01:37<00:06,  3.18it/s]
 94%|█████████████████████████████████████▌  | 330/351 [01:37<00:06,  3.27it/s]
 94%|█████████████████████████████████████▋  | 331/351 [01:38<00:05,  3.36it/s]
 95%|█████████████████████████████████████▊  | 332/351 [01:38<00:05,  3.19it/s]
 95%|█████████████████████████████████████▉  | 333/351 [01:38<00:05,  3.26it/s]
 95%|██████████████████████████████████████  | 334/351 [01:39<00:05,  3.32it/s]
 95%|██████████████████████████████████████▏ | 335/351 [01:39<00:04,  3.40it/s]
 96%|██████████████████████████████████████▎ | 336/351 [01:39<00:04,  3.43it/s]
 96%|██████████████████████████████████████▍ | 337/351 [01:39<00:04,  3.47it/s]
 96%|██████████████████████████████████████▌ | 338/351 [01:40<00:03,  3.36it/s]
 97%|██████████████████████████████████████▋ | 339/351 [01:40<00:03,  3.42it/s]
 97%|██████████████████████████████████████▋ | 340/351 [01:40<00:03,  3.48it/s]
 97%|██████████████████████████████████████▊ | 341/351 [01:41<00:02,  3.47it/s]
 97%|██████████████████████████████████████▉ | 342/351 [01:41<00:02,  3.48it/s]
 98%|███████████████████████████████████████ | 343/351 [01:41<00:02,  3.41it/s]
 98%|███████████████████████████████████████▏| 344/351 [01:41<00:02,  3.44it/s]
 98%|███████████████████████████████████████▎| 345/351 [01:42<00:01,  3.47it/s]
 99%|███████████████████████████████████████▍| 346/351 [01:42<00:01,  3.52it/s]
 99%|███████████████████████████████████████▌| 347/351 [01:42<00:01,  3.50it/s]
 99%|███████████████████████████████████████▋| 348/351 [01:43<00:00,  3.46it/s]
 99%|███████████████████████████████████████▊| 349/351 [01:43<00:00,  3.50it/s]
100%|███████████████████████████████████████▉| 350/351 [01:43<00:00,  3.51it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: test_videos_output/project_26.mp4 

Wall time: 1min 45s

A Full Example of a Test Image goes through my pipeline and displaying the

output after each step

In [100]:
plt.imshow(test_img_list[6])
Out[100]:
<matplotlib.image.AxesImage at 0x133b91d0>
In [96]:
processed_frame, undistorted_img, Minv = frame_processor(test_img_list[6])
plt.imshow(processed_frame, cmap='gray')
Out[96]:
<matplotlib.image.AxesImage at 0xae1e080>
In [97]:
out_img, ploty, left_fit, right_fit, left_fitx, right_fitx, left_lane_inds, right_lane_inds = blind_slide_window_detector(processed_frame)
plt.imshow(out_img, cmap='gray')
Out[97]:
<matplotlib.image.AxesImage at 0x11c5e4e0>
In [98]:
result, ploty, left_fit, right_fit, left_x, right_x, left_lane_inds, right_lane_inds = slide_window_detector(processed_frame, left_fit, right_fit)
plt.imshow(result)
Out[98]:
<matplotlib.image.AxesImage at 0x14bdf160>
In [99]:
lane_img = lane_highlight(processed_frame, undistorted_img, ploty, left_x, right_x, Minv)
img,  curve= lane_curvature(lane_img, ploty, left_x, right_x)
plt.imshow(lane_img)
Out[99]:
<matplotlib.image.AxesImage at 0x18150c18>

You've reached the end of this Notebook! :D